/*** STACK LOCATION ***/
ENTRY(stack_start)
- .long SYMBOL_NAME(cpu0_stack) + 4000 - __PAGE_OFFSET
+ .long SYMBOL_NAME(cpu0_stack) + 8100 - __PAGE_OFFSET
.long __HYPERVISOR_DS
/*** DESCRIPTOR TABLES ***/
.quad 0x0000000000000000 /* unused */
.fill 2*NR_CPUS,8,0 /* space for TSS and LDT per CPU */
-# The following adds 8-12kB to the kernel file size.
.org 0x1000
-ENTRY(idle_pg_table)
+ENTRY(idle_pg_table) # Initial page directory is 4kB
.org 0x2000
-ENTRY(cpu0_stack)
- .org 0x3000
+ENTRY(cpu0_stack) # Initial stack is 8kB
+ .org 0x4000
ENTRY(stext)
ENTRY(_stext)
extern int do_timer_lists_from_pit;
unsigned long low_mem_size;
+#ifdef STACK_GUARD
+ extern unsigned long cpu0_stack[];
+ l1_pgentry_t *l1;
+ l2_pgentry_t *l2;
+ int i, j;
+
+ /* When stack-guarding, Xen's heap cannot be mapped by super pages. */
+ for ( i = 0; i < (MAX_MONITOR_ADDRESS >> L2_PAGETABLE_SHIFT); i++ )
+ {
+ l1 = (l1_pgentry_t *)get_free_page(GFP_KERNEL);
+ for ( j = 0; j < ENTRIES_PER_L1_PAGETABLE; j++ )
+ l1[j] = mk_l1_pgentry((i << L2_PAGETABLE_SHIFT) |
+ (j << L1_PAGETABLE_SHIFT) |
+ PAGE_HYPERVISOR);
+ idle_pg_table[i] = idle_pg_table[i + l2_table_offset(PAGE_OFFSET)] =
+ mk_l2_pgentry(virt_to_phys(l1) | PAGE_HYPERVISOR);
+ }
+
+ /* Unmap the first page of CPU0's stack. */
+ l2 = &idle_pg_table[l2_table_offset(virt_to_phys(cpu0_stack))];
+ l1 = l2_pgentry_to_l1(*l2) + l1_table_offset(virt_to_phys(cpu0_stack));
+ *l1 = mk_l1_pgentry(0);
+#endif
+
if ( opt_watchdog )
nmi_watchdog = NMI_LOCAL_APIC;
struct task_struct *idle;
unsigned long boot_error = 0;
int timeout, cpu;
- unsigned long start_eip;
+ unsigned long start_eip, stack;
cpu = ++cpucount;
/* So we see what's up. */
printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);
- stack_start.esp = __pa(get_free_page(GFP_KERNEL)) + 4000;
+
+ stack = __pa(__get_free_pages(GFP_KERNEL, 1));
+#ifdef STACK_GUARD
+ {
+ /* Unmap the first page of the new CPU0's stack. */
+ l2_pgentry_t *l2 = &idle_pg_table[l2_table_offset(stack)];
+ l1_pgentry_t *l1 = l2_pgentry_to_l1(*l2) + l1_table_offset(stack);
+ *l1 = mk_l1_pgentry(0);
+ }
+#endif
+ stack_start.esp = stack + STACK_SIZE - STACK_RESERVED;
/*
* This grunge runs the startup process for
struct task_struct;
+#define STACK_RESERVED \
+ (sizeof(execution_context_t) + sizeof(struct task_struct *))
+
static inline struct task_struct * get_current(void)
{
struct task_struct *current;
__asm__ ( "orl %%esp,%0; andl $~3,%0; movl (%0),%0"
- : "=r" (current) : "0" (4092UL) );
+ : "=r" (current) : "0" (STACK_SIZE-4) );
return current;
}
static inline void set_current(struct task_struct *p)
{
__asm__ ( "orl %%esp,%0; andl $~3,%0; movl %1,(%0)"
- : : "r" (4092UL), "r" (p) );
+ : : "r" (STACK_SIZE-4), "r" (p) );
}
static inline execution_context_t *get_execution_context(void)
{
execution_context_t *execution_context;
- __asm__ ( "andl %%esp,%0; addl $4096-72,%0"
- : "=r" (execution_context) : "0" (~4095UL) );
+ __asm__ ( "andl %%esp,%0; addl %2,%0"
+ : "=r" (execution_context)
+ : "0" (~(STACK_SIZE-1)), "i" (STACK_SIZE-STACK_RESERVED) );
return execution_context;
}
{
unsigned long p;
__asm__ ( "orl %%esp,%0; andl $~3,%0"
- : "=r" (p) : "0" (4092UL) );
+ : "=r" (p) : "0" (STACK_SIZE-4) );
return p;
}
#define schedule_tail(_p) \
__asm__ __volatile__ ( \
- "andl %%esp,%0; addl $4096-72,%0; movl %0,%%esp; jmp *%1" \
- : : "r" (~4095UL), "r" (unlikely(is_idle_task((_p))) ? \
+ "andl %%esp,%0; addl %2,%0; movl %0,%%esp; jmp *%1" \
+ : : "r" (~(STACK_SIZE-1)), \
+ "r" (unlikely(is_idle_task((_p))) ? \
continue_cpu_idle_loop : \
- continue_nonidle_task) )
+ continue_nonidle_task), \
+ "i" (STACK_SIZE-STACK_RESERVED) )
#endif /* !(_I386_CURRENT_H) */
#ifndef NDEBUG
#define DPRINTK(_f, _a...) printk("(file=%s, line=%d) " _f, \
__FILE__, __LINE__, ## _a)
+#define STACK_GUARD
#else
#define DPRINTK(_f, _a...) ((void)0)
#endif
#include <asm/ptrace.h>
#include <xeno/smp.h>
#include <asm/processor.h>
-#include <asm/current.h>
#include <hypervisor-ifs/hypervisor-if.h>
#include <hypervisor-ifs/dom0_ops.h>
#include <xeno/delay.h>
#include <xeno/rbtree.h>
+#define STACK_SIZE (2*PAGE_SIZE)
+#include <asm/current.h>
+
#define MAX_DOMAIN_NAME 16
extern unsigned long volatile jiffies;
#define IDLE_DOMAIN_ID (~0)
#define is_idle_task(_p) ((_p)->domain == IDLE_DOMAIN_ID)
-#define STACK_SIZE PAGE_SIZE
-
#include <xeno/slab.h>
extern kmem_cache_t *task_struct_cachep;